bitkeeper revision 1.994.1.3 (40d7ff06C_ErgacoeY2JZXnEr4soPQ)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 22 Jun 2004 09:42:30 +0000 (09:42 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 22 Jun 2004 09:42:30 +0000 (09:42 +0000)
Remove PAGE_IO/pte_io hack from Linux.

.rootkeys
linux-2.4.26-xen-sparse/arch/xen/drivers/dom0/core.c
linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c
linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h
linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h
linux-2.4.26-xen-sparse/mm/memory.c
linux-2.4.26-xen-sparse/mm/vmalloc.c [deleted file]

index dffa81d6d47f711a92585fe2d99a9bfe0b472cfe..f8f629df372fd2a5ec5dc6cd7cb35819e1f7209c 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3e5a4e681xMPdF9xCMwpyfuYMySU5g linux-2.4.26-xen-sparse/mm/mremap.c
 409ba2e7akOFqQUg6Qyg2s28xcXiMg linux-2.4.26-xen-sparse/mm/page_alloc.c
 3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.26-xen-sparse/mm/swapfile.c
-3f108af81Thhb242EmKjGCYkjx-GJA linux-2.4.26-xen-sparse/mm/vmalloc.c
 3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
 3e6377b24eQqYMsDi9XrFkIgTzZ47A tools/balloon/Makefile
 3e6377d6eiFjF1hHIS6JEIOFk62xSA tools/balloon/README
index 5412a285729609a75d480e3a6b2b95b770171e05..99f84d99fde629037a1762991a8aff0874cf1a6b 100644 (file)
@@ -157,10 +157,10 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
         addr = m.addr;
         for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
         {
-            if ( get_user(mfn, p) ) return -EFAULT;
+            if ( get_user(mfn, p) )
+                return -EFAULT;
 
-            v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot) |
-                _PAGE_IO;
+            v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
 
             __direct_remap_area_pages(vma->vm_mm,
                                       addr, 
index a9b0faa41da1dfdb3c8fd6f2459f051caff60de9..1eaee72f48b7ba8495dc5b3ac8039350cdc575f5 100644 (file)
@@ -150,7 +150,7 @@ int direct_remap_area_pages(struct mm_struct *mm,
          * Fill in the machine address: PTE ptr is done later by
          * __direct_remap_area_pages(). 
          */
-        v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot) | _PAGE_IO;
+        v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
 
         machine_addr += PAGE_SIZE;
         address += PAGE_SIZE; 
@@ -262,8 +262,7 @@ void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
          */
         idx = FIX_BTMAP_BEGIN;
         while (nrpages > 0) {
-                __set_fixmap(idx, machine_addr, 
-                             __pgprot(__PAGE_KERNEL|_PAGE_IO));
+                __set_fixmap(idx, machine_addr, PAGE_KERNEL);
                 machine_addr += PAGE_SIZE;
                 --idx;
                 --nrpages;
index 162ba1fbed03cd752413b36b4a7690999cd52989..e6845abc867b7d1f8c888e8d139144921055bbff 100644 (file)
@@ -48,7 +48,26 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 }
 
 #define pte_same(a, b)         ((a).pte_low == (b).pte_low)
-#define pte_page(x)            (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
+
+/*                                 
+ * We detect special mappings in one of two ways:
+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
+ *     to be outside our maximum possible pseudophys range.
+ *  2. If the MFN belongs to a different domain then we will certainly
+ *     not have MFN in our p2m table. Conversely, if the page is ours,
+ *     then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !VALID_PAGE() by returning an out-of-range pointer.
+ */
+#define pte_page(_pte)                                        \
+({                                                            \
+    unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;         \
+    unsigned long pfn = mfn_to_pfn(mfn);                      \
+    if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) )     \
+        pfn = max_mapnr; /* specia: force !VALID_PAGE() */    \
+    &mem_map[pfn];                                            \
+})
+
 #define pte_none(x)            (!(x).pte_low)
 #define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
 
index 38721e4cfff61be483d443e91e66d4829708d9f3..dc25864d2cda858f0c2eaaefde5e7a325f1496fa 100644 (file)
@@ -116,7 +116,6 @@ extern void * high_memory;
 #define _PAGE_BIT_DIRTY                6
 #define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page, Pentium+, if present.. */
 #define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
-#define _PAGE_BIT_IO            9
 
 #define _PAGE_PRESENT  0x001
 #define _PAGE_RW       0x002
@@ -127,7 +126,6 @@ extern void * high_memory;
 #define _PAGE_DIRTY    0x040
 #define _PAGE_PSE      0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */
 #define _PAGE_GLOBAL   0x100   /* Global TLB entry PPro+ */
-#define _PAGE_IO        0x200
 
 #define _PAGE_PROTNONE 0x080   /* If not present */
 
@@ -200,7 +198,6 @@ static inline int pte_exec(pte_t pte)               { return (pte).pte_low & _PAGE_USER; }
 static inline int pte_dirty(pte_t pte)         { return (pte).pte_low & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)         { return (pte).pte_low & _PAGE_ACCESSED; }
 static inline int pte_write(pte_t pte)         { return (pte).pte_low & _PAGE_RW; }
-static inline int pte_io(pte_t pte)            { return (pte).pte_low & _PAGE_IO; }
 
 static inline pte_t pte_rdprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
 static inline pte_t pte_exprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
@@ -212,7 +209,6 @@ static inline pte_t pte_mkexec(pte_t pte)   { (pte).pte_low |= _PAGE_USER; return
 static inline pte_t pte_mkdirty(pte_t pte)     { (pte).pte_low |= _PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkyoung(pte_t pte)     { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte_low |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkio(pte_t pte)                { (pte).pte_low |= _PAGE_IO; return pte; }
 
 static inline int ptep_test_and_clear_dirty(pte_t *ptep)
 {
index 312dbfa7c0f85a6b61efe451fa1e3fed24dc5b55..6bdb08afe741d06db956b5c32ebe4454279b9628 100644 (file)
@@ -318,12 +318,6 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
                        continue;
                if (pte_present(pte)) {
                        struct page *page = pte_page(pte);
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-                       if (pte_io(pte)) {
-                               queue_l1_entry_update(ptep, 0);
-                               continue;
-                       }
-#endif
                        if (VALID_PAGE(page) && !PageReserved(page))
                                freed ++;
                        /* This will eventually call __free_pte on the pte. */
diff --git a/linux-2.4.26-xen-sparse/mm/vmalloc.c b/linux-2.4.26-xen-sparse/mm/vmalloc.c
deleted file mode 100644 (file)
index b030270..0000000
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- *  linux/mm/vmalloc.c
- *
- *  Copyright (C) 1993  Linus Torvalds
- *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
- */
-
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/smp_lock.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-
-rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
-struct vm_struct * vmlist;
-
-static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
-{
-       pte_t * pte;
-       unsigned long end;
-
-       if (pmd_none(*pmd))
-               return;
-       if (pmd_bad(*pmd)) {
-               pmd_ERROR(*pmd);
-               pmd_clear(pmd);
-               return;
-       }
-       pte = pte_offset(pmd, address);
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               pte_t page;
-               page = ptep_get_and_clear(pte);
-               address += PAGE_SIZE;
-               pte++;
-               if (pte_none(page))
-                       continue;
-               if (pte_present(page)) {
-                       struct page *ptpage = pte_page(page);
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-                       if (pte_io(page))
-                               continue;
-#endif
-                       if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
-                               __free_page(ptpage);
-                       continue;
-               }
-               printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
-       } while (address < end);
-}
-
-static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
-{
-       pmd_t * pmd;
-       unsigned long end;
-
-       if (pgd_none(*dir))
-               return;
-       if (pgd_bad(*dir)) {
-               pgd_ERROR(*dir);
-               pgd_clear(dir);
-               return;
-       }
-       pmd = pmd_offset(dir, address);
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       do {
-               free_area_pte(pmd, address, end - address);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-}
-
-void vmfree_area_pages(unsigned long address, unsigned long size)
-{
-       pgd_t * dir;
-       unsigned long end = address + size;
-
-       dir = pgd_offset_k(address);
-       flush_cache_all();
-       do {
-               free_area_pmd(dir, address, end - address);
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-       flush_tlb_all();
-}
-
-static inline int alloc_area_pte (pte_t * pte, unsigned long address,
-                       unsigned long size, int gfp_mask,
-                       pgprot_t prot, struct page ***pages)
-{
-       unsigned long end;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               struct page * page;
-
-               if (!pages) {
-                       spin_unlock(&init_mm.page_table_lock);
-                       page = alloc_page(gfp_mask);
-                       spin_lock(&init_mm.page_table_lock);
-               } else {
-                       page = (**pages);
-                       (*pages)++;
-
-                       /* Add a reference to the page so we can free later */
-                       if (page)
-                               atomic_inc(&page->count);
-
-               }
-               if (!pte_none(*pte))
-                       printk(KERN_ERR "alloc_area_pte: page already exists\n");
-               if (!page)
-                       return -ENOMEM;
-               set_pte(pte, mk_pte(page, prot));
-               address += PAGE_SIZE;
-               pte++;
-       } while (address < end);
-       return 0;
-}
-
-static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
-                       unsigned long size, int gfp_mask,
-                       pgprot_t prot, struct page ***pages)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       do {
-               pte_t * pte = pte_alloc(&init_mm, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               if (alloc_area_pte(pte, address, end - address,
-                                       gfp_mask, prot, pages))
-                       return -ENOMEM;
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-       return 0;
-}
-
-static inline int __vmalloc_area_pages (unsigned long address,
-                                       unsigned long size,
-                                       int gfp_mask,
-                                       pgprot_t prot,
-                                       struct page ***pages)
-{
-       pgd_t * dir;
-       unsigned long start = address;
-       unsigned long end = address + size;
-
-       dir = pgd_offset_k(address);
-       spin_lock(&init_mm.page_table_lock);
-       do {
-               pmd_t *pmd;
-               
-               pmd = pmd_alloc(&init_mm, dir, address);
-               if (!pmd)
-                       goto err;
-
-               if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
-                       goto err;       // The kernel NEVER reclaims pmds, so no need to undo pmd_alloc() here
-
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
-       flush_cache_all();
-       return 0;
-err:
-       spin_unlock(&init_mm.page_table_lock);
-       flush_cache_all();
-       if (address > start)
-               vmfree_area_pages(start, address - start);
-       return -ENOMEM;
-}
-
-int vmalloc_area_pages(unsigned long address, unsigned long size,
-                      int gfp_mask, pgprot_t prot)
-{
-       return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
-}
-
-struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
-{
-       unsigned long addr, next;
-       struct vm_struct **p, *tmp, *area;
-
-       area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
-       if (!area)
-               return NULL;
-
-       size += PAGE_SIZE;
-       if (!size) {
-               kfree (area);
-               return NULL;
-       }
-
-       addr = VMALLOC_START;
-       write_lock(&vmlist_lock);
-       for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
-               if ((size + addr) < addr)
-                       goto out;
-               if (size + addr <= (unsigned long) tmp->addr)
-                       break;
-               next = tmp->size + (unsigned long) tmp->addr;
-               if (next > addr) 
-                       addr = next;
-               if (addr > VMALLOC_END-size)
-                       goto out;
-       }
-       area->flags = flags;
-       area->addr = (void *)addr;
-       area->size = size;
-       area->next = *p;
-       *p = area;
-       write_unlock(&vmlist_lock);
-       return area;
-
-out:
-       write_unlock(&vmlist_lock);
-       kfree(area);
-       return NULL;
-}
-
-void __vfree(void * addr, int free_area_pages)
-{
-       struct vm_struct **p, *tmp;
-
-       if (!addr)
-               return;
-       if ((PAGE_SIZE-1) & (unsigned long) addr) {
-               printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
-               return;
-       }
-       write_lock(&vmlist_lock);
-       for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
-               if (tmp->addr == addr) {
-                       *p = tmp->next;
-                       if (free_area_pages)
-                           vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
-                       write_unlock(&vmlist_lock);
-                       kfree(tmp);
-                       return;
-               }
-       }
-       write_unlock(&vmlist_lock);
-       printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
-}
-
-void vfree(void * addr)
-{
-       __vfree(addr,1);
-}
-
-void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
-{
-       void * addr;
-       struct vm_struct *area;
-
-       size = PAGE_ALIGN(size);
-       if (!size || (size >> PAGE_SHIFT) > num_physpages)
-               return NULL;
-       area = get_vm_area(size, VM_ALLOC);
-       if (!area)
-               return NULL;
-       addr = area->addr;
-       if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
-                                prot, NULL)) {
-               __vfree(addr, 0);
-               return NULL;
-       }
-       return addr;
-}
-
-void * vmap(struct page **pages, int count,
-           unsigned long flags, pgprot_t prot)
-{
-       void * addr;
-       struct vm_struct *area;
-       unsigned long size = count << PAGE_SHIFT;
-
-       if (!size || size > (max_mapnr << PAGE_SHIFT))
-               return NULL;
-       area = get_vm_area(size, flags);
-       if (!area) {
-               return NULL;
-       }
-       addr = area->addr;
-       if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
-                                prot, &pages)) {
-               __vfree(addr, 0);
-               return NULL;
-       }
-       return addr;
-}
-
-long vread(char *buf, char *addr, unsigned long count)
-{
-       struct vm_struct *tmp;
-       char *vaddr, *buf_start = buf;
-       unsigned long n;
-
-       /* Don't allow overflow */
-       if ((unsigned long) addr + count < count)
-               count = -(unsigned long) addr;
-
-       read_lock(&vmlist_lock);
-       for (tmp = vmlist; tmp; tmp = tmp->next) {
-               vaddr = (char *) tmp->addr;
-               if (addr >= vaddr + tmp->size - PAGE_SIZE)
-                       continue;
-               while (addr < vaddr) {
-                       if (count == 0)
-                               goto finished;
-                       *buf = '\0';
-                       buf++;
-                       addr++;
-                       count--;
-               }
-               n = vaddr + tmp->size - PAGE_SIZE - addr;
-               do {
-                       if (count == 0)
-                               goto finished;
-                       *buf = *addr;
-                       buf++;
-                       addr++;
-                       count--;
-               } while (--n > 0);
-       }
-finished:
-       read_unlock(&vmlist_lock);
-       return buf - buf_start;
-}
-
-long vwrite(char *buf, char *addr, unsigned long count)
-{
-       struct vm_struct *tmp;
-       char *vaddr, *buf_start = buf;
-       unsigned long n;
-
-       /* Don't allow overflow */
-       if ((unsigned long) addr + count < count)
-               count = -(unsigned long) addr;
-
-       read_lock(&vmlist_lock);
-       for (tmp = vmlist; tmp; tmp = tmp->next) {
-               vaddr = (char *) tmp->addr;
-               if (addr >= vaddr + tmp->size - PAGE_SIZE)
-                       continue;
-               while (addr < vaddr) {
-                       if (count == 0)
-                               goto finished;
-                       buf++;
-                       addr++;
-                       count--;
-               }
-               n = vaddr + tmp->size - PAGE_SIZE - addr;
-               do {
-                       if (count == 0)
-                               goto finished;
-                       *addr = *buf;
-                       buf++;
-                       addr++;
-                       count--;
-               } while (--n > 0);
-       }
-finished:
-       read_unlock(&vmlist_lock);
-       return buf - buf_start;
-}